protect free_list with a spin lock.
struct pfn_info *pf, *pf_head;
unsigned int alloc_pfns;
unsigned int req_pages;
+ unsigned long flags;
/* how many pages do we need to alloc? */
req_pages = kbytes >> (PAGE_SHIFT - 10);
+ spin_lock_irqsave(&free_list_lock, flags);
+
/* is there enough mem to serve the request? */
if(req_pages > free_pfns)
return -1;
free_pfns--;
}
+
+ spin_unlock_irqrestore(&free_list_lock, flags);
p->tot_pages = req_pages;
unsigned long max_page;
struct list_head free_list;
+spinlock_t free_list_lock = SPIN_LOCK_UNLOCKED;
unsigned int free_pfns;
static int tlb_flush[NR_CPUS];
{
struct pfn_info *pf;
unsigned long page_index;
+ unsigned long flags;
memset(tlb_flush, 0, sizeof(tlb_flush));
memset(frame_table, 0, frame_table_size);
/* Put all domain-allocatable memory on a free list. */
+ spin_lock_irqsave(&free_list_lock, flags);
INIT_LIST_HEAD(&free_list);
for( page_index = (MAX_MONITOR_ADDRESS + frame_table_size) >> PAGE_SHIFT;
page_index < nr_pages;
pf = list_entry(&frame_table[page_index].list, struct pfn_info, list);
list_add_tail(&pf->list, &free_list);
}
+ spin_unlock_irqrestore(&free_list_lock, flags);
}
#include <asm/desc.h>
#include <xeno/list.h>
#include <hypervisor-ifs/hypervisor-if.h>
+#include <xeno/spinlock.h>
/* XXX KAF: These may die eventually, but so many refs in slab.c :((( */
extern frame_table_t * frame_table;
extern unsigned long frame_table_size;
extern struct list_head free_list;
+extern spinlock_t free_list_lock;
extern unsigned int free_pfns;
extern unsigned long max_page;
void init_frametable(unsigned long nr_pages);